v->arch.guest_table_user = mk_pagetable(0);
}
-
- if ( hvm_guest(v) )
- hvm_relinquish_guest_resources(v);
}
+ if ( hvm_guest(d->vcpu[0]) )
+ hvm_relinquish_guest_resources(d);
+
shadow_mode_disable(d);
/*
int vector, int has_code);
void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
+static void svm_relinquish_guest_resources(struct domain *d);
+
static struct asid_pool ASIDpool[NR_CPUS];
/*
return 1;
}
-int svm_relinquish_guest_resources(struct vcpu *v)
-{
- svm_relinquish_resources(v);
- return 1;
-}
-
void svm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
}
-void svm_relinquish_resources(struct vcpu *v)
+static void svm_relinquish_guest_resources(struct domain *d)
{
- struct hvm_virpit *vpit;
extern void destroy_vmcb(struct arch_svm_struct *); /* XXX */
+ struct vcpu *v;
+ for_each_vcpu ( d, v )
+ {
#if 0
- /*
- * This is not stored at the moment. We need to keep it somewhere and free
- * it Or maybe not, as it's a per-cpu-core item, and I guess we don't
- * normally remove CPU's other than for hot-plug capable systems, where I
- * guess we have to allocate and free host-save area in this case. Let's
- * not worry about it at the moment, as loosing one page per CPU hot-plug
- * event doesn't seem that excessive. But I may be wrong.
- */
- free_host_save_area(v->arch.hvm_svm.host_save_area);
+ /* Memory leak by not freeing this. XXXKAF: *Why* is not per core?? */
+ free_host_save_area(v->arch.hvm_svm.host_save_area);
#endif
- if ( v->vcpu_id == 0 )
- {
- /* unmap IO shared page */
- struct domain *d = v->domain;
- if ( d->arch.hvm_domain.shared_page_va )
- unmap_domain_page_global(
- (void *)d->arch.hvm_domain.shared_page_va);
- shadow_direct_map_clean(d);
+ destroy_vmcb(&v->arch.hvm_svm);
+ free_monitor_pagetable(v);
+ kill_timer(&v->arch.hvm_svm.hlt_timer);
+ if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
+ {
+ kill_timer( &(VLAPIC(v)->vlapic_timer) );
+ xfree(VLAPIC(v));
+ }
}
- destroy_vmcb(&v->arch.hvm_svm);
- free_monitor_pagetable(v);
- vpit = &v->domain->arch.hvm_domain.vpit;
- kill_timer(&vpit->pit_timer);
- kill_timer(&v->arch.hvm_svm.hlt_timer);
- if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
- {
- kill_timer( &(VLAPIC(v)->vlapic_timer) );
- xfree( VLAPIC(v) );
- }
+ kill_timer(&d->arch.hvm_domain.vpit.pit_timer);
+
+ if ( d->arch.hvm_domain.shared_page_va )
+ unmap_domain_page_global(
+ (void *)d->arch.hvm_domain.shared_page_va);
+
+ shadow_direct_map_clean(d);
}
}
}
-void vmx_relinquish_resources(struct vcpu *v)
+static void vmx_relinquish_guest_resources(struct domain *d)
{
- struct hvm_virpit *vpit;
+ struct vcpu *v;
- if (v->vcpu_id == 0) {
- /* unmap IO shared page */
- struct domain *d = v->domain;
- if ( d->arch.hvm_domain.shared_page_va )
- unmap_domain_page_global(
- (void *)d->arch.hvm_domain.shared_page_va);
- shadow_direct_map_clean(d);
- }
-
- vmx_request_clear_vmcs(v);
- destroy_vmcs(&v->arch.hvm_vmx);
- free_monitor_pagetable(v);
- vpit = &v->domain->arch.hvm_domain.vpit;
- kill_timer(&vpit->pit_timer);
- kill_timer(&v->arch.hvm_vmx.hlt_timer);
- if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
+ for_each_vcpu ( d, v )
{
- kill_timer(&VLAPIC(v)->vlapic_timer);
- xfree(VLAPIC(v));
+ vmx_request_clear_vmcs(v);
+ destroy_vmcs(&v->arch.hvm_vmx);
+ free_monitor_pagetable(v);
+ kill_timer(&v->arch.hvm_vmx.hlt_timer);
+ if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
+ {
+ kill_timer(&VLAPIC(v)->vlapic_timer);
+ xfree(VLAPIC(v));
+ }
}
+
+ kill_timer(&d->arch.hvm_domain.vpit.pit_timer);
+
+ if ( d->arch.hvm_domain.shared_page_va )
+ unmap_domain_page_global(
+ (void *)d->arch.hvm_domain.shared_page_va);
+
+ shadow_direct_map_clean(d);
}
#ifdef __x86_64__
return 1;
}
-int vmx_relinquish_guest_resources(struct vcpu *v)
-{
- vmx_relinquish_resources(v);
- return 1;
-}
-
void vmx_migrate_timers(struct vcpu *v)
{
struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
/*
* Initialize/relinguish HVM guest resources
*/
- int (*initialize_guest_resources)(struct vcpu *v);
- int (*relinquish_guest_resources)(struct vcpu *v);
+ int (*initialize_guest_resources)(struct vcpu *v);
+ void (*relinquish_guest_resources)(struct domain *d);
/*
* Store and load guest state:
static inline void
hvm_disable(void)
{
- if (hvm_funcs.disable)
- hvm_funcs.disable();
+ if ( hvm_funcs.disable )
+ hvm_funcs.disable();
}
static inline int
hvm_initialize_guest_resources(struct vcpu *v)
{
- if (hvm_funcs.initialize_guest_resources)
- return hvm_funcs.initialize_guest_resources(v);
+ if ( hvm_funcs.initialize_guest_resources )
+ return hvm_funcs.initialize_guest_resources(v);
return 0;
}
-static inline int
-hvm_relinquish_guest_resources(struct vcpu *v)
+static inline void
+hvm_relinquish_guest_resources(struct domain *d)
{
if (hvm_funcs.relinquish_guest_resources)
- return hvm_funcs.relinquish_guest_resources(v);
- return 0;
+ hvm_funcs.relinquish_guest_resources(d);
}
static inline void
hvm_funcs.restore_msrs(v);
}
#else
-#define hvm_save_segments(v) ((void)0)
-#define hvm_load_msrs(v) ((void)0)
-#define hvm_restore_msrs(v) ((void)0)
+#define hvm_save_segments(v) ((void)0)
+#define hvm_load_msrs(v) ((void)0)
+#define hvm_restore_msrs(v) ((void)0)
#endif /* __x86_64__ */
static inline void
extern void svm_vmwrite(struct vcpu *v, int index, unsigned long value);
extern void svm_final_setup_guest(struct vcpu *v);
extern int svm_paging_enabled(struct vcpu *v);
-extern void svm_relinquish_resources(struct vcpu *v);
extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
extern void svm_stts(struct vcpu *v);
extern void svm_do_launch(struct vcpu *v);
extern void stop_vmx(void);
void vmx_final_setup_guest(struct vcpu *v);
-void vmx_relinquish_resources(struct vcpu *v);
void vmx_enter_scheduler(void);